static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
{
- u32 *cpu_exec_control = &v->arch.hvm_vmx.exec_control;
u32 ctl = CPU_BASED_VIRTUAL_INTR_PENDING;
ASSERT(intack.source != hvm_intsrc_none);
ctl = CPU_BASED_VIRTUAL_NMI_PENDING;
}
- if ( !(*cpu_exec_control & ctl) )
+ if ( !(v->arch.hvm_vmx.exec_control & ctl) )
{
- *cpu_exec_control |= ctl;
- __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
+ v->arch.hvm_vmx.exec_control |= ctl;
+ vmx_update_cpu_exec_control(v);
}
}
if ( unlikely(v->arch.hvm_vcpu.single_step) )
{
v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG;
- __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+ vmx_update_cpu_exec_control(v);
return;
}
#endif /* __i386__ */
+void vmx_update_cpu_exec_control(struct vcpu *v)
+{
+ __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+}
+
static void vmx_update_secondary_exec_control(struct vcpu *v)
{
__vmwrite(SECONDARY_VM_EXEC_CONTROL,
/* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
v->arch.hvm_vcpu.flag_dr_dirty = 0;
v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
- __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+ vmx_update_cpu_exec_control(v);
v->arch.guest_context.debugreg[0] = read_debugreg(0);
v->arch.guest_context.debugreg[1] = read_debugreg(1);
v->arch.hvm_vmx.exec_control &= ~CPU_BASED_RDTSC_EXITING;
if ( enable )
v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
- __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+ vmx_update_cpu_exec_control(v);
vmx_vmcs_exit(v);
}
v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
if ( !hvm_paging_enabled(v) )
v->arch.hvm_vmx.exec_control |= cr3_ctls;
- __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+ vmx_update_cpu_exec_control(v);
/* Changing CR0.PE can change some bits in real CR4. */
vmx_update_guest_cr(v, 4);
/* Allow guest direct access to DR registers */
v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING;
- __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+ vmx_update_cpu_exec_control(v);
}
static void vmx_invlpg_intercept(unsigned long vaddr)
case EXIT_REASON_PENDING_VIRT_INTR:
/* Disable the interrupt window. */
v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
- __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
- v->arch.hvm_vmx.exec_control);
+ vmx_update_cpu_exec_control(v);
break;
case EXIT_REASON_PENDING_VIRT_NMI:
/* Disable the NMI window. */
v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
- __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
- v->arch.hvm_vmx.exec_control);
+ vmx_update_cpu_exec_control(v);
break;
case EXIT_REASON_TASK_SWITCH: {
const enum hvm_task_switch_reason reasons[] = {
case EXIT_REASON_MONITOR_TRAP_FLAG:
v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
- __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+ vmx_update_cpu_exec_control(v);
if ( v->domain->debugger_attached && v->arch.hvm_vcpu.single_step )
domain_pause_for_debugger();
break;